本日重點與方向 (TAG): kubernetes、k8s、PV、persistent volume、PVC、persistent volume claim、Pod use PVC、GlusterFS、Heketi、SC、StorageClass
今天將會介紹使用 Bare Metal 進行 Kubernetes 環境中持久化儲存的軟體功能測試,主要會是以 GlusterFS (存儲管理)、Heketi (GlusterFS 操縱介面) 兩個開源專案進行驗證,並對於先前組建的 kubernetes 功能進行整合,如果你中途裝一裝有一些異常與失敗的話,基本上就是參考安裝筆記最下面的重建儲存叢集,還有搭配一下前幾天的重建 kubernetes 的做法,GlusterFS 配置上基本上就是的碟硬要乾淨一些(據說是要給整個分割區或硬碟),Heketi 部分基本上不會出問題,也就是模板先改起來改一下帳號密碼的參數,之後就弄好啦,有副本需求相關的資源型態設定,他的設計的概念是基於 StorageClass 的配置,當然還有一些進階一點的去看官方網站 吧 (雖然官方已經 Archived 了) ,詳細的操作還是去看看輔助文章那邊的吧。
https://blog.csdn.net/chengyuming123/java/article/details/86539986
名稱 | 角色 | 服務區網 IP | OS_Disk | Data_Disk | Data_Disk 掛載點 |
---|---|---|---|---|---|
sdn-k8s-b2-1 | Master | 10.0.0.224 | 120G (SSD) | 500G (HDD) | /dev/sdb |
sdn-k8s-b2-2 | Worker | 10.0.0.225 | 120G (SSD) | 250G (HDD) | /dev/sdb |
sdn-k8s-b2-3 | Worker | 10.0.0.226 | 120G (SSD) | 250G (HDD) | /dev/sdb |
sudo fdisk -l
sudo wipefs -a <Disk-Mount-Point>
sudo wipefs -af <Disk-Mount-Point>
sudo wipefs -a
/dev/sdb
wget https://github.com/heketi/heketi/releases/download/v10.0.0/heketi-v10.0.0.linux.amd64.tar.gz
tar -zxvf heketi-v10.0.0.linux.amd64.tar.gz heketi/
cp heketi/heketi /usr/bin/
cp heketi/heketi-cli /usr/bin/
sudo add-apt-repository ppa:gluster/glusterfs-7
sudo apt-get update
sudo apt-get install glusterfs-client
glusterfs --version
root@sdn-k8s-b2-1:~# glusterfs --version
glusterfs 7.7
Repository revision: git://git.gluster.org/glusterfs.git
Copyright (c) 2006-2016 Red Hat, Inc. <https://www.gluster.org/>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
It is licensed to you under your choice of the GNU Lesser
General Public License, version 3 or any later version (LGPLv3
or later), or the GNU General Public License, version 2 (GPLv2),
in all cases as published by the Free Software Foundation.
mount.glusterfs -V
root@sdn-k8s-b2-1:~# mount.glusterfs -V
/sbin/mount.glusterfs: 650: shift: can't shift that many
nano /sbin/mount.glusterfs
原內容
#!/bin/sh
修改後
#!/bin/bash
rm -rf /etc/glusterfs /var/lib/glusterd
sudo modprobe dm_thin_pool
sudo modprobe dm_mirror
sudo modprobe dm_snapshot
lsmod | grep dm_thin_pool
lsmod | grep dm_mirror
lsmod | grep dm_snapshot
ubuntu@sdn-k8s-b2-1:~$ lsmod | grep dm_thin_pool
dm_thin_pool 69632 0
dm_persistent_data 73728 1 dm_thin_pool
dm_bio_prison 20480 1 dm_thin_pool
ubuntu@sdn-k8s-b2-1:~$ lsmod | grep dm_mirror
dm_mirror 24576 0
dm_region_hash 20480 1 dm_mirror
dm_log 20480 2 dm_region_hash,dm_mirror
ubuntu@sdn-k8s-b2-1:~$ lsmod | grep dm_snapshot
dm_snapshot 40960 0
dm_bufio 28672 2 dm_persistent_data,dm_snapshot
Port 埠口 | 功能 |
---|---|
2222 | GlusterFS pod 的 sshd |
24007 | Gluster Daemon |
24008 | GlusterFs 管理 |
49152-49251 | 每個 brick 可能會用到的 port |
netstat -tupln | grep <port>
kubectl taint nodes --all node-role.kubernetes.io/master-
git clone https://github.com/gluster/gluster-kubernetes.git
cd gluster-kubernetes/deploy
nano kube-templates/glusterfs-daemonset.yaml
# 註解
- "if command -v /usr/local/bin/status-probe.sh; then /usr/local/bin/status-probe.sh readiness; else systemctl status glusterd.service; fi"
# 添加
- "systemctl status glusterd.service"
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: glusterfs
labels:
glusterfs: daemonset
annotations:
description: GlusterFS DaemonSet
tags: glusterfs
spec:
template:
metadata:
name: glusterfs
labels:
glusterfs: pod
glusterfs-node: pod
spec:
nodeSelector:
storagenode: glusterfs
hostNetwork: true
containers:
- image: gluster/gluster-centos:latest
imagePullPolicy: IfNotPresent
name: glusterfs
env:
# set GLUSTER_BLOCKD_STATUS_PROBE_ENABLE to "1" so the
# readiness/liveness probe validate gluster-blockd as well
- name: GLUSTER_BLOCKD_STATUS_PROBE_ENABLE
value: "1"
- name: GB_GLFS_LRU_COUNT
value: "15"
- name: TCMU_LOGDIR
value: "/var/log/glusterfs/gluster-block"
resources:
requests:
memory: 100Mi
cpu: 100m
volumeMounts:
- name: glusterfs-heketi
mountPath: "/var/lib/heketi"
- name: glusterfs-run
mountPath: "/run"
- name: glusterfs-lvm
mountPath: "/run/lvm"
- name: glusterfs-etc
mountPath: "/etc/glusterfs"
- name: glusterfs-logs
mountPath: "/var/log/glusterfs"
- name: glusterfs-config
mountPath: "/var/lib/glusterd"
- name: glusterfs-dev-disk
mountPath: "/dev/disk"
- name: glusterfs-dev-mapper
mountPath: "/dev/mapper"
- name: glusterfs-misc
mountPath: "/var/lib/misc/glusterfsd"
- name: glusterfs-cgroup
mountPath: "/sys/fs/cgroup"
readOnly: true
- name: glusterfs-ssl
mountPath: "/etc/ssl"
readOnly: true
- name: kernel-modules
mountPath: "/usr/lib/modules"
readOnly: true
securityContext:
capabilities: {}
- name: glusterfs-ssl
mountPath: "/etc/ssl"
readOnly: true
- name: kernel-modules
mountPath: "/usr/lib/modules"
readOnly: true
securityContext:
capabilities: {}
privileged: true
readinessProbe:
timeoutSeconds: 3
initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
#- "if command -v /usr/local/bin/status-probe.sh; then /usr/local/bin/status-probe.sh readiness; else systemctl status glusterd.service; fi" //注释
- "systemctl status glusterd.service" //添加
periodSeconds: 25
successThreshold: 1
failureThreshold: 50
livenessProbe:
timeoutSeconds: 3
initialDelaySeconds: 40
exec:
command:
- "/bin/bash"
- "-c"
#- "if command -v /usr/local/bin/status-probe.sh; then /usr/local/bin/status-probe.sh readiness; else systemctl status glusterd.service; fi" //注释
- "systemctl status glusterd.service" //添加
periodSeconds: 25
successThreshold: 1
failureThreshold: 50
volumes:
- name: glusterfs-heketi
hostPath:
path: "/var/lib/heketi"
- name: glusterfs-run
- name: glusterfs-lvm
hostPath:
path: "/run/lvm"
- name: glusterfs-etc
hostPath:
path: "/etc/glusterfs"
- name: glusterfs-logs
hostPath:
path: "/var/log/glusterfs"
- name: glusterfs-config
hostPath:
path: "/var/lib/glusterd"
- name: glusterfs-dev-disk
hostPath:
path: "/dev/disk"
- name: glusterfs-dev-mapper
hostPath:
path: "/dev/mapper"
- name: glusterfs-misc
hostPath:
path: "/var/lib/misc/glusterfsd"
- name: glusterfs-cgroup
hostPath:
path: "/sys/fs/cgroup"
- name: glusterfs-ssl
hostPath:
path: "/etc/ssl"
- name: kernel-modules
hostPath:
path: "/usr/lib/modules"
nano topology.json
{
"clusters": [
{
"nodes": [
{
"node": {
"hostnames": {
"manage": [
"sdn-k8s-b2-1"
],
"storage": [
"10.0.0.224"
]
},
"zone": 1
},
"devices": [
"/dev/sdb1"
]
},
{
"node": {
"hostnames": {
"manage": [
"sdn-k8s-b2-2"
],
"storage": [
"10.0.0.225"
]
},
"zone": 1
},
"devices": [
"/dev/sdb1"
]
},
{
"node": {
"hostnames": {
"manage": [
"sdn-k8s-b2-3"
],
"storage": [
"10.0.0.226"
]
},
"zone": 1
},
"devices": [
"/dev/sdb1"
]
}
]
}
]
}
cp heketi.json.template heketi.json
{
"_port_comment": "Heketi Server Port Number",
"port" : "8080",
"_use_auth": "Enable JWT authorization. Please enable for deployment",
"use_auth" : false,
"_jwt" : "Private keys for access",
"jwt" : {
"_admin" : "Admin has access to all APIs",
"admin" : {
"key" : ""
},
"_user" : "User only has access to /volumes endpoint",
"user" : {
"key" : ""
}
},
"_glusterfs_comment": "GlusterFS Configuration",
"glusterfs" : {
"_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
"executor" : "${HEKETI_EXECUTOR}",
"_db_comment": "Database file name",
"db" : "/var/lib/heketi/heketi.db",
"kubeexec" : {
"rebalance_on_expansion": true
},
"sshexec" : {
"rebalance_on_expansion": true,
"keyfile" : "/etc/heketi/private_key",
"port" : "${SSH_PORT}",
"user" : "${SSH_USER}",
"sudo" : ${SSH_SUDO}
}
},
"backup_db_to_kube_secret": false
}
nano ./gk-deploy
刪除參數
--show-all
準備 kubernetes 環境
執行部署指令
./gk-deploy -g --admin-key <admin-key> --user-key <user-key> topology.json
kubectl get node --show-labels | grep -E “NAME|node”
kubectl exec <pod> -- glusterfs --version
在異常節點之上
-----
rm -rf /etc/glusterfs /var/lib/glusterd
export HEKETI_CLI_SERVER=$(kubectl get svc/heketi --template 'http://{{.spec.clusterIP}}:{{(index .spec.ports 0).port}}')
curl $HEKETI_CLI_SERVER/hello
Hello from Heketi
echo "export HEKETI_CLI_SERVER=http://$(kubectl get svc heketi -n heketi -o go-template='{{.spec.clusterIP}}'):8080" >> /etc/profile.d/heketi.sh
echo "alias heketi-cli='heketi-cli --user admin --secret <heketi-admin-key>'" >> ~/.bashrc
source /etc/profile.d/heketi.sh
source ~/.bashrc
echo $HEKETI_CLI_SERVER
export HEKETI_CLI_USER=admin
export HEKETI_CLI_KEY=<admin-password>
heketi-cli cluster list
heketi-cli cluster info <glusterfs-cluster-id>
heketi-cli topology info
heketi-cli node list
heketi-cli node info <glusterfs-node-id>
heketi-cli volume create --size=2 --replica=3
heketi-cli volumn list
heketi-cli volumn info <gluster-volume-id>
heketi-cli volume delete <gluster-volume-id>
echo -n "mypassword" | base64
apiVersion: v1
kind: Secret
type: kubernetes.io/glusterfs
metadata:
name: heketi-secret
namespace: heketi
data:
# base64 encoded password.
key: aXRyaS1kZXYtYWRtaW4=
kubectl create -f heketi-secret.yaml
組建 StorageClass
撰寫 yaml 檔案
storageclass-gluster-heketi.yaml
-----
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: gluster-heketi-storageclass
parameters:
resturl: "http://10.100.158.73:8080"
clusterid: "b3a19fbbde8a5703ec5423fa4745a274"
restauthenabled: "true"
restuser: "admin"
secretName: "heketi-secret"
secretNamespace: "heketi"
volumetype: "replicate:3"
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
kubectl apply -f storageclass-gluster-heketi.yaml
pvc-gluster-heketi.yaml
-----
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-gluster-heketi
spec:
storageClassName: gluster-heketi-storageclass
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
kubectl apply -f pvc-gluster-heketi.yaml
pod-use-pvc-glusterfs-heketi.yaml
-----
apiVersion: v1
kind: Pod
metadata:
name: ubuntu-use-pvc
spec:
containers:
- name: pod-use-pvc
image: ubuntu
command:
- sleep
- "600000"
imagePullPolicy: IfNotPresent
volumeMounts:
- name: gluster-volume
mountPath: "/testSpeed"
readOnly: false
volumes:
- name: gluster-volume
persistentVolumeClaim:
claimName: pvc-gluster-heketi
kubectl apply -f pod-use-pvc-glusterfs-heketi.yaml
kubeadm reset
init 6
wipefs /dev/<mount-point>